library(tidyverse)
Registered S3 method overwritten by 'dplyr':
method from
print.rowwise_df
Registered S3 methods overwritten by 'dbplyr':
method from
print.tbl_lazy
print.tbl_sql
[37m── [1mAttaching packages[22m ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── tidyverse 1.3.0 ──[39m
[37m[32m✓[37m [34mggplot2[37m 3.2.1 [32m✓[37m [34mpurrr [37m 0.3.3
[32m✓[37m [34mtibble [37m 2.1.3 [32m✓[37m [34mdplyr [37m 0.8.4
[32m✓[37m [34mtidyr [37m 1.0.2 [32m✓[37m [34mstringr[37m 1.4.0
[32m✓[37m [34mreadr [37m 1.3.1 [32m✓[37m [34mforcats[37m 0.4.0[39m
[37m── [1mConflicts[22m ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── tidyverse_conflicts() ──
[31mx[37m [34mdplyr[37m::[32mfilter()[37m masks [34mstats[37m::filter()
[31mx[37m [34mdplyr[37m::[32mlag()[37m masks [34mstats[37m::lag()[39m
library(lme4)
Loading required package: Matrix
Attaching package: ‘Matrix’
The following objects are masked from ‘package:tidyr’:
expand, pack, unpack
library(lmerTest)
Attaching package: ‘lmerTest’
The following object is masked from ‘package:lme4’:
lmer
The following object is masked from ‘package:stats’:
step
library(plotrix)
library(stringr)
library(readxl)
library(RColorBrewer)
library(mvtnorm)
library(mgcv)
Loading required package: nlme
Attaching package: ‘nlme’
The following object is masked from ‘package:lme4’:
lmList
The following object is masked from ‘package:dplyr’:
collapse
This is mgcv 1.8-31. For overview type 'help("mgcv-package")'.
# Compute the log-likelihood of a new dataset using a fit lme4 model.
logLik_test <- function(lm, test_X, test_y) {
predictions <- predict(lm, test_X, re.form=NA)
# Get std.dev. of residual, estimated from train data
stdev <- sigma(lm)
# For each prediction--observation, get the density p(obs | N(predicted, model_sigma)) and reduce
density <- sum(dnorm(test_y, predictions, stdev, log=TRUE))
return(density)
}
# Get per-prediction log-likelihood
logLik_test_per <- function(lm, test_X, test_y) {
predictions <- predict(lm, test_X, re.form=NA)
# Get std.dev. of residual, estimated from train data
stdev <- sigma(lm)
# For each prediction--observation, get the density p(obs | N(predicted, model_sigma))
densities <- dnorm(test_y, predictions, stdev, log=TRUE)
return(densities)
}
# Compute MSE of a new dataset using a fit lme4 model.
mse_test <- function(lm, test_X, test_y) {
return(mean((predict(lm, test_X, re.form=NA) - test_y) ^ 2))
}
#Sanity checks
#mylm <- gam(psychometric ~ s(surprisal, bs = "cr", k = 20) + s(prev_surp, bs = "cr", k = 20) + te(freq, len, bs = "cr") + te(prev_freq, prev_len, bs = "cr"), data=train_data)
#c(logLik(mylm), logLik_test(mylm, train_data, train_data$psychometric))
#logLik_test(mylm, test_data, test_data$psychometric)
data = read.csv("../data/harmonized_results.csv")
all_data = data %>%
mutate(seed = as.factor(seed)) %>%
group_by(corpus, model, training, seed) %>%
mutate(prev_surp = lag(surprisal),
prev_code = lag(code),
prev_len = lag(len),
prev_freq = lag(freq),
prev_surp = lag(surprisal),
prev2_freq = lag(prev_freq),
prev2_code = lag(prev_code),
prev2_len = lag(prev_len),
prev2_surp = lag(prev_surp),
prev3_freq = lag(prev2_freq),
prev3_code = lag(prev2_code),
prev3_len = lag(prev2_len),
prev3_surp = lag(prev2_surp)) %>%
ungroup() %>%
# Filter back three for the dundee corpus. Filter back 1 for all other corpora
filter((corpus == "dundee" & code == prev2_code + 2) | (corpus != "dundee" & code == prev_code + 1)) %>%
select(-prev_code, -prev2_code, -prev3_code) %>%
drop_na()
all_data = all_data %>%
mutate(
model = as.character(model),
model = if_else(model == "gpt-2", "gpt2", model),
model = as.factor(model))
all_data_safe = all_data
# Compute linear model stats for the given training data subset and full test data.
# Automatically subsets the test data to match the relevant group for which we are training a linear model.
get_lm_data <- function(df, test_data, formula, fold, store_env) {
#this_lm <- gam(formula, data=df);
this_lm = lm(formula, data=df)
this_test_data <- semi_join(test_data, df, by=c("training", "model", "seed", "corpus"));
# Save lm to the global env so that we can access residuals later.
lm_name = paste(unique(paste(df$model, df$training, df$seed, df$corpus))[1], fold)
assign(lm_name, this_lm, envir=store_env)
summarise(df,
log_lik = as.numeric(logLik(this_lm, REML = F)),
test_lik = logLik_test(this_lm, this_test_data, this_test_data$psychometric),
test_mse = mse_test(this_lm, this_test_data, this_test_data$psychometric))
}
# For a previously fitted lm stored in store_env, get the residuals on test data of the relevant data subset.
get_lm_residuals <- function(df, fold, store_env) {
# Retrieve the relevant lm.
lm_name = paste(unique(paste(df$model, df$training, df$seed, df$corpus))[1], fold)
this_lm <- get(lm_name, envir=store_env)
mutate(df,
likelihood = logLik_test_per(this_lm, df, df$psychometric),
resid = df$psychometric - predict(this_lm, df, re.form=NA))
}
# Compute per-example delta-log-likelihood for the given test fold.
get_lm_delta_log_lik <- function(test_data, fold, baseline_env, full_env) {
lm_name = paste(unique(paste(test_data$model, test_data$training, test_data$seed, test_data$corpus))[1], fold)
baseline_lm <- get(lm_name, envir=baseline_env)
full_lm <- get(lm_name, envir=full_env)
delta_log_lik = logLik_test_per(full_lm, test_data, test_data$psychometric) - logLik_test_per(baseline_lm, test_data, test_data$psychometric)
return(cbind(test_data, delta_log_lik=delta_log_lik))
}
#####
# Define regression formulae.
# Eye-tracking regression: only use surprisal and previous surprisal; SPRT regression: use 2-back features.
#baseline_rt_regression = psychometric ~ te(freq, len, bs = "cr") + te(prev_freq, prev_len, bs = "cr")
#baselie_sprt_regression = psychometric ~ te(freq, len, bs = "cr") + te(prev_freq, prev_len, bs = "cr") + te(prev2_freq, prev2_len, bs = "cr")
#full_rt_regression = (psychometric ~ s(surprisal, bs = "cr", k = 20) + s(prev_surp, bs = "cr", k = 20)
#+ te(freq, len, bs = "cr") + te(prev_freq, prev_len, bs = "cr"))
#full_sprt_regression = (psychometric ~ s(surprisal, bs = "cr", k = 20) + s(prev_surp, bs = "cr", k = 20) + s(prev2_surp, bs = "cr", k = 20)
#+ te(freq, len, bs = "cr") + te(prev_freq, prev_len, bs = "cr") + te(prev2_freq, prev2_len, bs = "cr"))
baseline_rt_regression = psychometric ~ freq + prev_freq + prev2_freq + prev3_freq + len + prev_len + prev2_len + prev3_len
baseline_sprt_regression = psychometric ~ freq + prev_freq + len + prev_len
full_rt_regression = psychometric ~ surprisal + prev_surp + prev2_surp + prev3_surp + freq + prev_freq + prev2_freq + prev3_freq + len + prev_len + prev2_len + prev3_len
full_sprt_regression = psychometric ~ surprisal + prev_surp + freq + prev_freq + len + prev_len
#####
# Prepare frames/environments for storing results/objects.
baseline_results = data.frame()
full_model_results = data.frame()
#baseline_residuals = data.frame()
#full_residuals = data.frame()
log_lik_deltas = data.frame()
#Randomly shuffle the data
all_data<-all_data[sample(nrow(all_data)),]
#Create K equally size folds
K = 5
folds <- cut(seq(1,nrow(all_data)),breaks=K,labels=FALSE)
#Perform 10 fold cross validation
# Fit models for some fold of the data.
baseline_corpus = function(corpus, df, test_data, fold, env) {
if(corpus == "dundee") {
get_lm_data(df, test_data, baseline_rt_regression, fold, env)
} else {
get_lm_data(df, test_data, baseline_sprt_regression, fold, env)
}
}
full_model_corpus = function(corpus, df, test_data, fold, env) {
if(corpus[1] == "dundee") {
get_lm_data(df, test_data, full_rt_regression, fold, env)
} else {
get_lm_data(df, test_data, full_sprt_regression, fold, env)
}
}
# Prepare a new Environment in which we store fitted LMs, which we'll query later for residuals and other metrics.
baseline_env = new.env()
full_env = new.env()
for(i in 1:K) {
#Segement your data by fold using the which() function
testIndexes <- which(folds==i, arr.ind=TRUE)
test_data <- all_data[testIndexes, ]
train_data <- all_data[-testIndexes, ]
# Compute a baseline linear model for each model--training--seed--RT-corpus combination.
baselines = train_data %>%
group_by(model, training, seed, corpus) %>%
print(model) %>%
do(baseline_corpus(unique(.$corpus), ., test_data, i, baseline_env)) %>%
ungroup() %>%
mutate(seed = as.factor(seed),
fold = i)
baseline_results = rbind(baseline_results, baselines)
# Compute a full linear model for each model--training--seed-RT-corpus combination
full_models = train_data %>%
group_by(model, training, seed, corpus) %>%
do(full_model_corpus(unique(.$corpus), ., test_data, i, full_env)) %>%
ungroup() %>%
mutate(seed = as.factor(seed),
fold = i)
full_model_results = rbind(full_model_results, full_models)
# Compute delta-log-likelihoods
fold_log_lik_deltas = test_data %>%
group_by(model, training, seed, corpus) %>%
do(get_lm_delta_log_lik(., i, baseline_env, full_env)) %>%
ungroup()
log_lik_deltas = rbind(log_lik_deltas, fold_log_lik_deltas)
# fold_baseline_residuals = test_data %>%
# group_by(model, training, seed, corpus) %>%
# do(get_lm_residuals(., i, baseline_env)) %>%
# ungroup()
#
# baseline_residuals = rbind(baseline_residuals, fold_baseline_residuals)
#
# fold_full_residuals = test_data %>%
# group_by(model, training, seed, corpus) %>%
# do(get_lm_residuals(., i, full_env)) %>%
# ungroup()
#
# full_residuals = rbind(full_residuals, fold_full_residuals)
}
|===================================================================================================================== | 67% ~1 s remaining
|=========================================================================================================================== | 71% ~1 s remaining
|================================================================================================================================= | 74% ~1 s remaining
|======================================================================================================================================= | 78% ~1 s remaining
|============================================================================================================================================= | 81% ~1 s remaining
|=================================================================================================================================================== | 84% ~0 s remaining
|========================================================================================================================================================= | 88% ~0 s remaining
|=============================================================================================================================================================== | 91% ~0 s remaining
|===================================================================================================================================================================== | 95% ~0 s remaining
|=========================================================================================================================================================================== | 98% ~0 s remaining
|===============================================================================================================================================================================|100% ~0 s remaining
|==================================================================================================================================== | 76% ~1 s remaining
|======================================================================================================================================= | 78% ~1 s remaining
|============================================================================================================================================= | 81% ~1 s remaining
|=================================================================================================================================================== | 84% ~0 s remaining
|========================================================================================================================================================= | 88% ~0 s remaining
|=============================================================================================================================================================== | 91% ~0 s remaining
|===================================================================================================================================================================== | 95% ~0 s remaining
|=========================================================================================================================================================================== | 98% ~0 s remaining
|========================================================================================================================================== | 79% ~1 s remaining
|============================================================================================================================================= | 81% ~0 s remaining
|=================================================================================================================================================== | 84% ~0 s remaining
|========================================================================================================================================================= | 88% ~0 s remaining
|=============================================================================================================================================================== | 91% ~0 s remaining
|===================================================================================================================================================================== | 95% ~0 s remaining
|=========================================================================================================================================================================== | 98% ~0 s remaining
|======================================================================================================================================= | 78% ~1 s remaining
|============================================================================================================================================= | 81% ~1 s remaining
|=================================================================================================================================================== | 84% ~0 s remaining
|========================================================================================================================================================= | 88% ~0 s remaining
|============================================================================================================================================================ | 90% ~0 s remaining
|=============================================================================================================================================================== | 91% ~0 s remaining
|===================================================================================================================================================================== | 95% ~0 s remaining
|=========================================================================================================================================================================== | 98% ~0 s remaining
|============================================================================================================================================= | 81% ~0 s remaining
|=================================================================================================================================================== | 84% ~0 s remaining
|========================================================================================================================================================= | 88% ~0 s remaining
|=============================================================================================================================================================== | 91% ~0 s remaining
|===================================================================================================================================================================== | 95% ~0 s remaining
|=========================================================================================================================================================================== | 98% ~0 s remaining
|======================================================================================================================================= | 78% ~1 s remaining
|============================================================================================================================================= | 81% ~1 s remaining
|=================================================================================================================================================== | 84% ~0 s remaining
|========================================================================================================================================================= | 88% ~0 s remaining
|=============================================================================================================================================================== | 91% ~0 s remaining
|===================================================================================================================================================================== | 95% ~0 s remaining
|=========================================================================================================================================================================== | 98% ~0 s remaining
|======================================================================================================================================= | 78% ~1 s remaining
|============================================================================================================================================= | 81% ~0 s remaining
|=================================================================================================================================================== | 84% ~0 s remaining
|========================================================================================================================================================= | 88% ~0 s remaining
|=============================================================================================================================================================== | 91% ~0 s remaining
|===================================================================================================================================================================== | 95% ~0 s remaining
|=========================================================================================================================================================================== | 98% ~0 s remaining
|======================================================================================================================================= | 78% ~1 s remaining
|============================================================================================================================================= | 81% ~1 s remaining
|=================================================================================================================================================== | 84% ~0 s remaining
|====================================================================================================================================================== | 86% ~0 s remaining
|========================================================================================================================================================= | 88% ~0 s remaining
|=============================================================================================================================================================== | 91% ~0 s remaining
|===================================================================================================================================================================== | 95% ~0 s remaining
|=========================================================================================================================================================================== | 98% ~0 s remaining
|============================================================================================================================================= | 81% ~0 s remaining
|=================================================================================================================================================== | 84% ~0 s remaining
|========================================================================================================================================================= | 88% ~0 s remaining
|=============================================================================================================================================================== | 91% ~0 s remaining
|===================================================================================================================================================================== | 95% ~0 s remaining
|=========================================================================================================================================================================== | 98% ~0 s remaining
|======================================================================================================================== | 69% ~1 s remaining
|=========================================================================================================================== | 71% ~1 s remaining
|================================================================================================================================= | 74% ~1 s remaining
|======================================================================================================================================= | 78% ~1 s remaining
|============================================================================================================================================= | 81% ~1 s remaining
|=================================================================================================================================================== | 84% ~0 s remaining
|========================================================================================================================================================= | 88% ~0 s remaining
|=============================================================================================================================================================== | 91% ~0 s remaining
|===================================================================================================================================================================== | 95% ~0 s remaining
|=========================================================================================================================================================================== | 98% ~0 s remaining
#write.csv(full_residuals, "../data/analysis_checkpoints/full_residuals.csv")
#write.csv(baseline_residuals, "../data/analysis_checkpoints/baseline_residuals.csv")
model_deltas = log_lik_deltas %>%
group_by(model, training, seed, corpus) %>%
summarise(mean_delta_log_lik = mean(delta_log_lik),
sem_delta_log_lik = sd(delta_log_lik) / sqrt(length(delta_log_lik)))
write.csv(full_model_results, "../data/analysis_checkpoints/full_model_result.csv")
write.csv(baseline_results, "../data/analysis_checkpoints/baseline_results.csv")
#full_model_results = read.csv("../data/analysis_checkpoints/ffull_model_results.csv")
#baseline_results = read.csv("../data/analysis_checkpoints/fbaseline_resultsb.csv")
# all_residuals = full_residuals %>%
# select(model, training, seed, corpus, code, likelihood) %>%
# right_join((baseline_residuals %>% select(model, training, seed, corpus, code, likelihood)),
# suffix=c(".full", ".baseline"),
# by=c("model", "training", "seed", "corpus"))
#
# # Join baseline models with full models and compare performance within-fold.
# model_fold_deltas = baseline_results %>%
# right_join(full_model_results, suffix=c(".baseline", ".full"),
# by=c("model", "training", "seed", "corpus", "fold")) %>%
#
# mutate(seed = as.factor(seed)) %>%
#
# # Compute per-fold deltas.
# group_by(model, training, seed, corpus, fold) %>%
# mutate(delta_log_lik = test_lik.full - test_lik.baseline,
# delta_mse = test_mse.full - test_mse.baseline) %>%
# ungroup() %>%
# select(model, training, seed, corpus, fold,
# delta_log_lik, delta_mse)
#
# # Now compute across-fold delta statistics for each model--training--seed--corpus.
# model_deltas = model_fold_deltas %>%
# group_by(model, training, seed, corpus) %>%
# summarise(mean_delta_log_lik = sum(delta_log_lik),
# sem_delta_log_lik = sd(delta_log_lik) / sqrt(length(delta_log_lik)),
# mean_delta_mse = sum(delta_mse),
# sem_delta_mse = sd(delta_mse) / sqrt(length(delta_mse)))
metric <- "ΔLogLik"
#metric <- "-ΔMSE"
# # Select the relevant metric.
# model_fold_deltas = model_fold_deltas %>%
# # Retrieve the current test metric
# mutate(delta_test = delta_log_lik) %>%
# select(-delta_log_lik, -delta_mse)
# # Select the relevant metric.
model_deltas = model_deltas %>%
# Retrieve the current test metric
mutate(delta_test_mean = mean_delta_log_lik,
delta_test_sem = sem_delta_log_lik) %>%
# mutate(delta_test_mean = mean_delta_mse,
# delta_test_sem = sem_delta_mse)
# Remove the raw metrics.
select(-mean_delta_log_lik, -sem_delta_log_lik,
#-mean_delta_mse, -sem_delta_mse
)
model_deltas
# Sanity check: training on train+test data should yield improved performance over training on just training data. (When evaluating on test data.)
# full_baselines = all_data %>%
# group_by(model, training, seed, corpus) %>%
# summarise(baseline_train_all_test_lik = logLik_test(lm(psychometric ~ len + freq + sent_pos, data=.), semi_join(test_data, ., by=c("training", "model", "seed", "corpus")), semi_join(test_data, ., by=c("training", "model", "seed", "corpus"))$psychometric)) %>%
# ungroup()
# full_baselines
#
# full_baselines %>%
# right_join(baselines, by=c("seed", "training", "model", "corpus")) %>%
# mutate(delta=baseline_train_all_test_lik-baseline_test_lik) %>%
# select(-baseline_lik) # %>%
# #select(-baseline_test_lik, -baseline_train_all_test_lik, -baseline_lik, -baseline_test_mse)
language_model_data = read.csv("../data/model_metadata.csv") %>%
mutate(model = as.character(model),
model = if_else(model == "gpt-2", "gpt2", model),
model = as.factor(model)) %>%
mutate(train_size = case_when(str_starts(training, "bllip-lg") ~ 42,
str_starts(training, "bllip-md") ~ 15,
str_starts(training, "bllip-sm") ~ 5,
str_starts(training, "bllip-xs") ~ 1)) %>%
mutate(seed = as.factor(seed)) %>%
select(-pid, -test_loss) %>%
distinct(model, training, seed, .keep_all = TRUE)
table(language_model_data$seed)
0 111 120 922 1111 3602 4301 7245 7877 28066 28068 44862 51272 64924 1581807512 1581807578 1581861474 1581955288
4 7 6 5 4 1 1 1 1 1 1 1 1 1 1 1 1 1
1582126320 1586986276 1587139950
1 1 1
table(model_deltas$seed)
111 120 922 1111 3602 4301 7245 7877 28066 28068 44862 51272 64924 1581807512 1581807578 1581861474 1581955288 1582126320
6 6 6 8 2 2 2 2 2 2 2 2 2 2 2 2 2 2
1586986276 1587139950
2 2
First join delta-metric data with model auxiliary data.
model_deltas = model_deltas %>%
merge(language_model_data, by = c("seed", "training", "model"), all=T) %>%
drop_na()
model_deltas
Also join on the original linear model data, rather than collapsing to delta-metrics. This will support regressions later on that don’t collapse across folds.
# Exclude ordered-neurons from all analyses.
model_deltas <- model_deltas %>%
filter(model != "ordered-neurons")
model_deltas %>%
ggplot(aes(x=sg_score, y=delta_test_mean)) +
geom_errorbar(aes(ymin=delta_test_mean-delta_test_sem, ymax=delta_test_mean+delta_test_sem)) +
geom_smooth(method="lm", se=T) +
geom_point(stat="identity", position="dodge", alpha=1, size=3, aes(color=training, shape=model)) +
ylab(metric) +
xlab("Syntax Generalization Score") +
ggtitle("Syntactic Generalization vs. Predictive Power") +
scale_color_manual(values = c("bllip-lg"="#440154FF",
"bllip-md"="#39568CFF",
"bllip-sm"="#1F968BFF",
"bllip-xs"="#73D055FF",
"bllip-lg-gptbpe"="#888888",
"bllip-md-gptbpe"="#AAAAAA",
"bllip-sm-gptbpe"="#CCCCCC",
"bllip-xs-gptbpe"="#CCCCCC")) +
facet_grid(~corpus, scales="free") +
theme(axis.text=element_text(size=14),
strip.text.x = element_text(size=14),
legend.text=element_text(size=14),
axis.title=element_text(size=18),
legend.position = "bottom")
#ggsave("./cogsci_images/sg_loglik.png",height=5,width=6)
We control for effects of perplexity by relating the residuals of a performance ~ PPL regression to SG score.
# Prepare a residualized regression for x1 onto y, controlling for the effects of x2.
d_resid = model_deltas %>%
drop_na() %>%
# Residualize delta metric w.r.t PPL for each model--training--seed
group_by(corpus) %>%
mutate(resid.delta = resid(lm(delta_test_mean ~ training:test_ppl))) %>%
ungroup() %>%
# Residualize SG score w.r.t. PPL for each training group
group_by(training) %>%
# NB no need for training:ppl interaction, since we're within-group.
mutate(resid.sg = resid(lm(sg_score ~ test_ppl))) %>%
ungroup() %>%
# Compute summary statistics across model--training--seed--corpus.
group_by(model, training, corpus, seed) %>%
summarise(resid.delta.mean = mean(resid.delta),
resid.delta.sem = sd(resid.delta) / sqrt(length(resid.delta)),
resid.sg.mean = mean(resid.sg),
resid.sg.sem = sd(resid.sg) / sqrt(length(resid.sg)))
# Now plot residual vs SG
d_resid %>%
#filter(corpus != "bnc-brown") %>%
ggplot(aes(x=resid.sg.mean, y=resid.delta.mean)) +
geom_errorbar(aes(xmin=resid.sg.mean - resid.sg.sem,
xmax=resid.sg.mean + resid.sg.sem,
ymin=resid.delta.mean - resid.delta.sem,
ymax=resid.delta.mean + resid.delta.sem), alpha=0.3) +
geom_smooth(method="lm", se=T) +
geom_point(stat="identity", position="dodge", alpha=1, size=4, aes(shape=model, color=training)) +
ylab(paste("Residual", metric)) +
xlab("Residual Syntax Generalization Score") +
ggtitle("Syntactic Generalization vs. Predictive Power") +
scale_color_manual(values = c("bllip-lg"="#440154FF",
"bllip-md"="#39568CFF",
"bllip-sm"="#1F968BFF",
"bllip-xs"="#73D055FF",
"bllip-lg-gptbpe"="#888888",
"bllip-md-gptbpe"="#AAAAAA",
"bllip-sm-gptbpe"="#CCCCCC",
"bllip-xs-gptbpe"="#CCCCCC")) +
facet_grid(.~corpus, scales="free") +
theme(axis.text=element_text(size=14),
strip.text.x = element_text(size=14),
legend.text=element_text(size=14),
axis.title=element_text(size=18),
legend.position = "right")
Ignoring unknown aesthetics: xmin, xmax
ggsave("../images/cuny2020/ppl_sg.png",height=4.5,width=11)
do_stepwise_regression = function(cur_corpus) {
regression_data = model_deltas %>%
filter(corpus == cur_corpus)
print("----------------------")
print(cur_corpus)
lm1 = lm(delta_test_mean ~ training:test_ppl, data = regression_data)
lm2 = lm(delta_test_mean ~ training:test_ppl + sg_score, data = regression_data)
print(anova(lm1, lm2))
summary(lm2)
}
#do_stepwise_regression("bnc-brown")
do_stepwise_regression("dundee")
[1] "----------------------"
[1] "dundee"
Analysis of Variance Table
Model 1: delta_test_mean ~ training:test_ppl
Model 2: delta_test_mean ~ training:test_ppl + sg_score
Res.Df RSS Df Sum of Sq F Pr(>F)
1 20 2.8138e-05
2 19 1.4666e-05 1 1.3472e-05 17.453 0.0005107 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Call:
lm(formula = delta_test_mean ~ training:test_ppl + sg_score,
data = regression_data)
Residuals:
Min 1Q Median 3Q Max
-9.924e-04 -4.743e-04 -8.779e-05 3.257e-04 2.949e-03
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 6.399e-03 1.113e-03 5.751 1.53e-05 ***
sg_score -7.297e-03 1.747e-03 -4.178 0.000511 ***
trainingbllip-lg:test_ppl 3.790e-05 1.005e-05 3.772 0.001290 **
trainingbllip-lg-gptbpe:test_ppl 5.406e-04 3.419e-05 15.813 2.17e-12 ***
trainingbllip-md:test_ppl 2.151e-05 8.598e-06 2.502 0.021650 *
trainingbllip-md-gptbpe:test_ppl 4.608e-04 2.472e-05 18.641 1.14e-13 ***
trainingbllip-sm:test_ppl 9.978e-07 7.516e-06 0.133 0.895775
trainingbllip-sm-gptbpe:test_ppl 9.626e-05 6.452e-06 14.919 6.05e-12 ***
trainingbllip-xs:test_ppl -5.457e-06 5.071e-06 -1.076 0.295433
trainingbllip-xs-gptbpe:test_ppl 3.182e-05 2.189e-06 14.534 9.58e-12 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Residual standard error: 0.0008786 on 19 degrees of freedom
Multiple R-squared: 0.9864, Adjusted R-squared: 0.9799
F-statistic: 152.7 on 9 and 19 DF, p-value: 8.608e-16
do_stepwise_regression("natural-stories")
[1] "----------------------"
[1] "natural-stories"
Analysis of Variance Table
Model 1: delta_test_mean ~ training:test_ppl
Model 2: delta_test_mean ~ training:test_ppl + sg_score
Res.Df RSS Df Sum of Sq F Pr(>F)
1 20 2.6111e-05
2 19 1.8309e-05 1 7.8013e-06 8.0957 0.01035 *
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Call:
lm(formula = delta_test_mean ~ training:test_ppl + sg_score,
data = regression_data)
Residuals:
Min 1Q Median 3Q Max
-0.0020214 -0.0003834 0.0000000 0.0003953 0.0016338
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 1.144e-02 1.243e-03 9.205 1.97e-08 ***
sg_score -5.553e-03 1.952e-03 -2.845 0.01035 *
trainingbllip-lg:test_ppl -2.704e-05 1.123e-05 -2.409 0.02633 *
trainingbllip-lg-gptbpe:test_ppl 1.892e-04 3.820e-05 4.953 8.82e-05 ***
trainingbllip-md:test_ppl -3.765e-05 9.606e-06 -3.920 0.00092 ***
trainingbllip-md-gptbpe:test_ppl 2.249e-04 2.762e-05 8.143 1.29e-07 ***
trainingbllip-sm:test_ppl -4.425e-05 8.398e-06 -5.269 4.37e-05 ***
trainingbllip-sm-gptbpe:test_ppl -6.129e-05 7.209e-06 -8.503 6.70e-08 ***
trainingbllip-xs:test_ppl -3.625e-05 5.666e-06 -6.398 3.90e-06 ***
trainingbllip-xs-gptbpe:test_ppl -4.201e-06 2.446e-06 -1.718 0.10211
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Residual standard error: 0.0009817 on 19 degrees of freedom
Multiple R-squared: 0.9485, Adjusted R-squared: 0.9241
F-statistic: 38.87 on 9 and 19 DF, p-value: 2.319e-10
model_deltas %>%
ggplot(aes(x=test_ppl, y=delta_test_mean, color=training)) +
geom_errorbar(aes(ymin=delta_test_mean-delta_test_sem, ymax=delta_test_mean+delta_test_sem), alpha=0.4) +
#geom_smooth(method="lm", se=F) +
geom_point(stat="identity", position="dodge", alpha=1, size=4, aes(shape=model)) +
ylab(metric) +
xlab("Test Perplexity") +
#coord_cartesian(ylim = c(1, 16)) +
ggtitle("Test Perplexity vs. Predictive Power") +
scale_color_manual(values = c("bllip-lg"="#440154FF",
"bllip-md"="#39568CFF",
"bllip-sm"="#1F968BFF",
"bllip-xs"="#73D055FF",
"bllip-lg-gptbpe"="#888888",
"bllip-md-gptbpe"="#AAAAAA",
"bllip-sm-gptbpe"="#CCCCCC",
"bllip-xs-gptbpe"="#CCCCCC")) +
facet_grid(~corpus, scales="free") +
#coord_cartesian(ylim = c(0, 150)) +
theme(axis.text=element_text(size=12),
strip.text.x = element_text(size=12),
legend.text=element_text(size=12),
axis.title=element_text(size=12),
legend.position = "right")
ggsave("../images/cuny2020/ppl_loglik.png",height=4.5,width=11)
model_deltas %>%
#filter(model != "5gram", training != "bllip-lg") %>%
group_by(model, corpus) %>%
#summarise(n = n())
summarise(corr = as.numeric(cor.test(delta_test_mean, test_ppl)[4]),
pval = as.numeric(cor.test(delta_test_mean, test_ppl)[3]))
model_deltas %>%
mutate(train_size = log(train_size)) %>%
ggplot(aes(x=train_size, y=delta_test_mean, color=model)) +
geom_errorbar(aes(ymin=delta_test_mean-delta_test_sem, ymax=delta_test_mean+delta_test_sem), width = 0.1) +
geom_smooth(method="lm", se=T, alpha=0.5) +
geom_point(stat="identity", position="dodge", alpha=1, size=3) +
ylab(metric) +
xlab("Log Million Training Tokens") +
ggtitle("Training Size vs. Predictive Power") +
facet_grid(corpus~model, scales="free") +
#scale_color_manual(values = c("#A42EF1", "#3894C8")) +
theme(axis.text=element_text(size=14),
strip.text.x = element_text(size=14),
legend.text=element_text(size=14),
axis.title=element_text(size=18),
legend.position = "bottom")
#ggsave("./cogsci_images/training_loglik.png",height=5,width=6)
model_deltas %>%
mutate(train_size = log(train_size)) %>%
ggplot(aes(x=train_size, y=sg_score, color=model)) +
geom_smooth(method="lm", se=T, alpha=0.5) +
geom_point(stat="identity", position="dodge", alpha=1, size=3) +
ylab("SG SCore") +
xlab("Log Million Training Tokens") +
ggtitle("Training Size vs. Syntactic Generalization") +
#scale_color_manual(values = c("#A42EF1", "#3894C8")) +
facet_grid(~model, scales="free") +
theme(axis.text=element_text(size=14),
strip.text.x = element_text(size=14),
legend.text=element_text(size=14),
axis.title=element_text(size=18),
legend.position = "bottom")
#ggsave("./cogsci_images/training_sg.png",height=5,width=6)
all_data %>%
filter(surprisal < 15, surprisal > 0) %>%
ggplot(aes(x=surprisal, y=psychometric, color=training)) +
stat_smooth(se=T, alpha=0.5) +
#geom_errorbar(color="black", width=.2, position=position_dodge(width=.9), alpha=0.3) +
#geom_point(stat="identity", position="dodge", alpha=1, size=3) +
ylab("Processing Time (ms)") +
xlab("Surprisal (bits)") +
ggtitle("Surprisal vs. Reading Time / Gaze Duration") +
facet_grid(corpus~model, scales = "free") +
scale_color_manual(values = c("bllip-lg"="#440154FF",
"bllip-md"="#39568CFF",
"bllip-sm"="#1F968BFF",
"bllip-xs"="#73D055FF",
"bllip-lg-gptbpe"="#888888",
"bllip-md-gptbpe"="#AAAAAA",
"bllip-sm-gptbpe"="#CCCCCC",
"bllip-xs-gptbpe"="#CCCCCC")) +
theme(axis.text=element_text(size=14),
axis.text.y = element_text(size = 10),
strip.text.x = element_text(size=14),
legend.text=element_text(size=14),
axis.title=element_text(size=18),
legend.position = "right")
ggsave("../images/cuny2020/surp_corr.png",height=4.5,width=12)